# Check requisite packages are installed.
Warning messages:
1: Unknown or uninitialised column: `SeedRuns`.
2: Unknown or uninitialised column: `SeedRunsNum`.
3: Unknown or uninitialised column: `Abund`.
packages <- c(
"plotly",
"dplyr"
)
for (pkg in packages) {
library(pkg, character.only = TRUE)
}
package 㤼㸱plotly㤼㸲 was built under R version 4.0.5Loading required package: ggplot2
package 㤼㸱ggplot2㤼㸲 was built under R version 4.0.3
Attaching package: 㤼㸱plotly㤼㸲
The following object is masked from 㤼㸱package:ggplot2㤼㸲:
last_plot
The following object is masked from 㤼㸱package:stats㤼㸲:
filter
The following object is masked from 㤼㸱package:graphics㤼㸲:
layout
package 㤼㸱dplyr㤼㸲 was built under R version 4.0.4
Attaching package: 㤼㸱dplyr㤼㸲
The following objects are masked from 㤼㸱package:stats㤼㸲:
filter, lag
The following objects are masked from 㤼㸱package:base㤼㸲:
intersect, setdiff, setequal, union
Pulling code almost directly from LM1996-NumPoolCom-QDatMake-2021-05.Rmd.
dirViking <- c(
file.path(
getwd(), "LCAB_LawMorton1996-NumericalPoolCommunityScaling5"
)
)
dirVikingResults <- file.path(
dirViking, paste0(
"save-", c(
"2021-05-19",
"2021-05-21",
"2021-05-24"
)
)
)
resultFormat <- paste0(
"run-",
"%d", # Combination Number, or CombnNum.
"-",
"%s", # Run Seed.
".RDS"
)
source(
file.path(getwd(),
"LawMorton1996-NumericalPoolCommunityScaling-Settings5.R")
)
paramFrame <- with(list(
b = rep(basal, times = length(consumer)),
c = rep(consumer, each = length(basal)),
s1 = seedsPrep[1:(length(basal) * length(consumer))],
s2 = seedsPrep[
(length(basal) * length(consumer) + 1):(
2 * length(basal) * length(consumer))
],
sR = seedsRun
), {
temp <- data.frame(
CombnNum = 0,
Basals = b,
Consumers = c,
SeedPool = s1,
SeedMat = s2,
SeedRuns = "",
SeedRunsNum = 0,
EndStates = I(rep(list(""), length(b))),
EndStatesNum = 0,
EndStateSizes = I(rep(list(""), length(b))),
EndStateSizesNum = NA,
EndStateAssembly = I(rep(list(""), length(b))),
EndStateAbundance = I(rep(list(""), length(b))),
Dataset = "2021-05:5:Connectance",
DatasetID = 5,
stringsAsFactors = FALSE
)
for (i in 1:nrow(temp)) {
seeds <- sR[((i - 1) * runs + 1) : (i * runs)]
temp$SeedRuns[i] <- toString(seeds) # CSV
temp$SeedRunsNum[i] <- length(seeds)
}
temp$CombnNum <- 1:nrow(temp)
temp
})
# Note: n + 2 end states. Failure to finish, failure to obtain state, and state.
# Modified from above, but with the abundance recorded.
for (i in 1:nrow(paramFrame)) {
resultsList <- list(
"No Run" = 0,
"No State" = 0
)
resultsSize <- list(
"0" = 0
)
resultsAssembly <- list(
"No Run" = data.frame(),
"No State" = data.frame()
)
resultsAbund <- list(
"No Run" = "",
"No State" = ""
)
seeds <- unlist(strsplit(paramFrame$SeedRuns[i], ', '))
for (seed in seeds) {
fileName <- file.path(
dirVikingResults,
sprintf(resultFormat, paramFrame$CombnNum[i], seed)
)
fileName <- fileName[file.exists(fileName)]
if (length(fileName) >= 1) {
if (length(fileName) == 2) {
temp <- load(fileName[1])
temp <- eval(parse(text = temp))
temp2 <- load(fileName[2])
temp2 <- eval(parse(text = temp2))
if (!identical(temp, temp2)) {
stop("2 files, but not identical.")
}
} else if (length(fileName) > 2) {
stop("At least 3 same files.")
} else {
temp <- load(fileName)
temp <- eval(parse(text = temp)) # Get objects.
}
if (is.list(temp) && "Result" %in% names(temp)) {
if (is.data.frame(temp$Result))
community <- temp$Result$Community[[nrow(temp$Result)]]
else
community <- temp$Result
size <- toString(length(community))
if (community[1] != "")
abund <- toString(temp$Abund[community + 1])
else
abund <- ""
community <- toString(community)
if (community == "") {
resultsList$`No State` <- resultsList$`No State` + 1
resultsSize$`0` <- resultsSize$`0` + 1
} else if (community %in% names(resultsList)) {
resultsList[[community]] <- resultsList[[community]] + 1
resultsSize[[size]] <- resultsSize[[size]] + 1
} else {
resultsList[[community]] <- 1
resultsAssembly[[community]] <- temp
resultsAbund[[community]] <- abund
if (size %in% resultsSize) {
resultsSize[[size]] <- resultsSize[[size]] + 1
} else {
resultsSize[[size]] <- 1
}
}
} else {
resultsList$`No State` <- resultsList$`No State` + 1
resultsSize$`0` <- resultsSize$`0` + 1
}
} else {
resultsList$`No Run` <- resultsList$`No Run` + 1
resultsSize$`0` <- resultsSize$`0` + 1
}
}
paramFrame$EndStates[[i]] <- resultsList
paramFrame$EndStatesNum[i] <- length(resultsList) - 2 # ! No State, No Run
paramFrame$EndStateSizes[[i]] <- resultsSize
paramFrame$EndStateSizesNum[i] <- length(resultsSize) - 1 # ! 0
paramFrame$EndStateAssembly[[i]] <- resultsAssembly
paramFrame$EndStateAbundance[[i]] <- resultsAbund
}
# X, Y, Basal and Consumer.
# Z = Sizes of the Endstates.
plotScalingData <- data.frame(
CombnNum = rep(paramFrame$CombnNum, paramFrame$EndStatesNum),
Basals = rep(paramFrame$Basals, paramFrame$EndStatesNum),
Consumers = rep(paramFrame$Consumers, paramFrame$EndStatesNum),
Dataset = rep(paramFrame$Dataset, paramFrame$EndStatesNum),
DatasetID = rep(paramFrame$DatasetID, paramFrame$EndStatesNum)
)
# Communities
comms <- unlist(lapply(paramFrame$EndStates, names))
freqs <- unlist(paramFrame$EndStates)
asmbl <- unlist(paramFrame$EndStateAssembly, recursive = FALSE)
asmbl <- asmbl[comms != "No Run" & comms != "No State"]
freqs <- freqs[comms != "No Run" & comms != "No State"]
comms <- comms[comms != "No Run" & comms != "No State"]
asmbl <- lapply(asmbl, function(d) {
if (is.null(d)) return(NA)
if ("Result.Outcome" %in% names(d))
d %>% dplyr::filter(Result.Outcome != "Type 1 (Failure)" &
Result.Outcome != "Present")
else
d$Result %>% dplyr::filter(Outcome != "Type 1 (Failure)" &
Outcome != "Present")
})
plotScalingData$Communities <- comms
plotScalingData$CommunityFreq <- freqs
plotScalingData$CommunitySeq <- asmbl
# Community Size
temp <- unlist(lapply(strsplit(plotScalingData$Communities, ','), length))
plotScalingData$CommunitySize <- temp
# For usage by the reader.
plotScaling <- plotly::plot_ly(
plotScalingData,
x = ~Basals,
y = ~Consumers,
z = ~CommunitySize,
color = ~Dataset,
colors = c("red", "blue", "black")
)
plotScaling <- plotly::add_markers(plotScaling)
plotScaling <- plotly::layout(
plotScaling,
scene = list(
xaxis = list(type = "log"),
yaxis = list(type = "log"),
camera = list(
eye = list(
x = -1.25, y = -1.25, z = .05
)
)
)
)
plotScaling
# > runif(1) * 1E8
# [1] 82598679
set.seed(82598679)
mats <- list()
poolsall <- list() # name pools used in save data; be careful!
for (i in 1:length(dirViking)) {
temp <- load(file.path(
dirViking[i],
paste0("LawMorton1996-NumericalPoolCommunityScaling-PoolMats",
5, #if (i > 1) i else "",
".RDS")
))
mats[[i]] <- eval(parse(text = temp[1]))
poolsall[[i]] <- eval(parse(text = temp[2]))
}
pools <- poolsall
candidateData <- plotScalingData %>% dplyr::group_by(
CombnNum, Dataset
) %>% dplyr::mutate(
OtherSteadyStates = dplyr::n() - 1
) %>% dplyr::filter(
OtherSteadyStates > 0
)
candidateData %>% dplyr::select(-CommunitySeq)
Alas, there are 0 rows. So we are done, but perhaps we can close this file with some little thoughts and comments. For instance, we can recycle some earlier code to look at the two largest communities to see if there is anything interesting going on. We’ll load the abundances anyways.
candidateData <- plotScalingData %>% dplyr::group_by(
CombnNum, Dataset
) %>% dplyr::mutate(
OtherSteadyStates = dplyr::n() - 1
)
# First, check if it is in the paramFrame.
# Second, check if it is in the saved data from the previous.
# Otherwise, ignore it, we'll figure out what it is and why it is missing later.
candidateData$CommunityAbund <- ""
for (r in 1:nrow(candidateData)) {
# ID 1:4 are used to identify paramFrame, 5 used to identify abundance
ID <- candidateData[r, 1:6]
paramFrameRow <- paramFrame %>% dplyr::filter(
CombnNum == ID$CombnNum,
Basals == ID$Basals,
Consumers == ID$Consumers,
Dataset == ID$Dataset
)
if (is.list(paramFrameRow$EndStateAbundance[[1]])) {
entry <- which(ID$Communities == names(paramFrameRow$EndStateAbundance[[1]]))
if (length(entry)) {
candidateData$CommunityAbund[r] <- paramFrameRow$EndStateAbundance[[1]][[entry]]
next()
}
}
}
candidateData <- candidateData %>% dplyr::filter(CommunityAbund != "",
CommunityAbund != "Failure")
candidateData$CommunityProd <- NA
for (r in 1:nrow(candidateData)) {
candidateData$CommunityProd[r] <- with(
candidateData[r, ],
RMTRCode2::Productivity(
Pool = pools[[1]][[CombnNum]],
InteractionMatrix = mats[[1]][[CombnNum]],
Community = Communities,
Populations = CommunityAbund
)
)
}
candidateData
Taking code from LM1996-NumPoolCom-FoodWebs-2021-07.Rmd.
foodWebs <- list()
for (r in 1:nrow(candidateData)) {
foodWebs[[r]] <- with(
candidateData[r, ],
{
redCom <- RMTRCode2::CsvRowSplit(Communities)
redMat <- mats[[1]][[CombnNum]][redCom, redCom]
redPool <- pools[[1]][[CombnNum]][redCom, ]
colnames(redMat) <- paste0('s',as.character(redCom))
rownames(redMat) <- colnames(redMat)
names(redPool)[1] <- "node"
redPool$node <- colnames(redMat)
names(redPool)[3] <- "M"
Graph <- igraph::graph_from_adjacency_matrix(
redMat, weighted = TRUE
)
Graph <- igraph::set.vertex.attribute(
Graph, "name", value = colnames(redMat)
)
redPool$N <- RMTRCode2::CsvRowSplit(CommunityAbund)
# For later analysis, take the matrix diagonal.
redPool$Intraspecific <- diag(redMat)
GraphAsDataFrame <- igraph::as_data_frame(Graph)
# Add in abundances for calculating abundance * (gain or loss)
GraphAsDataFrame <- dplyr::left_join(
GraphAsDataFrame,
dplyr::select(redPool, node, N),
by = c("to" = "node")
)
# Split data frame.
ResCon <- GraphAsDataFrame[GraphAsDataFrame$weight > 0,]
ConRes <- GraphAsDataFrame[GraphAsDataFrame$weight < 0,]
# Reorder and rename variables.
ResCon <- dplyr::select(ResCon,
to, from, # resource = to, consumer = from,
effectPerUnit = weight, resourceAbund = N)
ConRes <- dplyr::select(ConRes,
to, from, # resource = from, consumer = to,
effectPerUnit = weight, consumerAbund = N)
ResCon <- dplyr::mutate(dplyr::group_by(ResCon, from),
effectActual = effectPerUnit * resourceAbund,
Type = "Exploit+")
ConRes <- dplyr::mutate(dplyr::group_by(ConRes, from),
effectActual = effectPerUnit * consumerAbund,
Type = ifelse(from == to,
"SelfReg-",
"Exploit-"))
IntriG <- with(redPool, data.frame(
from = node, #resource = node,
to = node, #consumer = node,
effectPerUnit = ifelse(ReproductionRate > 0,
ReproductionRate, 0),
effectActual = ifelse(ReproductionRate > 0,
N * ReproductionRate, 0),
Type = "Intrisc+"))
IntriL <- with(redPool, data.frame(
from = node, #resource = node,
to = node, #consumer = node,
effectPerUnit = ifelse(ReproductionRate < 0,
ReproductionRate, 0),
effectActual = ifelse(ReproductionRate < 0,
N * ReproductionRate, 0),
Type = "Intrisc-"))
EdgeDataFrame <- dplyr::bind_rows(
dplyr::select(ResCon, -resourceAbund),
dplyr::select(ConRes, -consumerAbund),
IntriG, IntriL
)
EdgeDataFrame <- EdgeDataFrame %>% dplyr::rename(
# Empirically speaking, to and from appear reversed.
# A consumer (from) should have a negative effect on resource (to),
# but the organisation so far marks it as positive. We fix this.
tempname = to,
to = from
) %>% dplyr::rename(
from = tempname
) %>% dplyr::filter(
# Remove placeholder entries
effectPerUnit != 0
) %>% dplyr::mutate(
# Useful to keep effects separate
effectSign = sign(effectPerUnit)
) %>% group_by(
to, effectSign
) %>% dplyr::mutate(
# Perform the post mortem of the most influential from's
effectEfficiency = effectPerUnit / sum(effectPerUnit),
effectNormalised = effectActual / sum(effectActual)
) %>% dplyr::arrange(to)
list(
Edges = EdgeDataFrame,
Vertices = redPool
)
}
)
}
Preparatory code:
toCheddar <- function(EVList, name = "") {# Edges Vertices List
links <- EVList$Edges
# cheddar does not like "cannibalism".
links <- links[
links$to != links$from,
]
# "[C]olumns called ‘resource’ and ‘consumer’ must be given."
links <- dplyr::bind_rows(
links %>% dplyr::filter(effectSign == 1) %>% dplyr::rename(
resource = from, consumer = to),
links %>% dplyr::filter(effectSign == -1) %>% dplyr::rename(
resource = to, consumer = from),
) %>% dplyr::select(-Type) # Cheddar confuses node Type and edge Type.
cheddar::Community(
nodes = EVList$Vertices,
properties = list(
title = name,
M.units = "masses",
N.units = "abund"
),
trophic.links = links
)
}
toIGraph <- function(EVList, sign = 0) {
igraph::graph_from_data_frame(
d = if(sign == 0) {
EVList$Edges
} else {
EVList$Edges[EVList$Edges$effectSign == sign, ]
},
directed = TRUE,
vertices = EVList$Vertices
)
}
toPostMortem <- function(EVList,
threshold = 0, # sets to minimal size edges below
nodeSize = c("None", "Abundance", "Size"),
edgeScale = 10,
reducedTrophic = TRUE) {
if (tolower(threshold) == "adaptive") {
threshold = EVList$Edges %>% group_by(
to, effectSign
) %>% summarise(
max = max(effectNormalised), .groups = "drop"
) %>% ungroup %>% pull(max) %>% min
}
theGc <- toCheddar(EVList, name = "Trophic Levels")
theGi <- toIGraph(EVList)
theGiGain <- toIGraph(EVList, sign = 1)
theGiLoss <- toIGraph(EVList, sign = -1)
theLayout <- igraph::layout.circle(theGi)
theSize <- match.arg(nodeSize, c("Abundance", "Size", "None"))
if (theSize == "Abundance")
theVs <- sqrt(igraph::vertex_attr(theGi)$N) * 10
else if (theSize == "Size") {
theVs <- igraph::vertex_attr(theGi)$M
theVs <- sqrt(theVs / min(theVs)) * 10
} else if (theSize == "None") {
theVs <- 15
}
theColors <- ifelse(
igraph::vertex_attr(theGi)$Type == "Basal", "skyblue", "red"
)
theBoth <- igraph::edge_attr(theGi)$effectNormalised
theGain <- igraph::edge_attr(theGiGain)$effectNormalised
theLoss <- igraph::edge_attr(theGiLoss)$effectNormalised
theBoth[theBoth < threshold] <- 0
theGain[theGain < threshold] <- 0
theLoss[theLoss < threshold] <- 0
# Inform the graphs of which edges are not needed.
theGi <- igraph::delete_edges(theGi, which(theBoth == 0))
theGiGain <- igraph::delete_edges(theGiGain, which(theGain == 0))
theGiLoss <- igraph::delete_edges(theGiLoss, which(theLoss == 0))
# Remove the same entries so that lengths match.
theGain <- theGain[theGain > 0]
theLoss <- theLoss[theLoss > 0]
theGain <- theGain * edgeScale
theLoss <- theLoss * edgeScale
parold <- par(no.readonly = TRUE)
par(mfrow = c(2, 2), # Two Rows, Two Columns
mar = c(0, 1.5, 1, 0), # Margins, bottom, left, top, right
oma = c(0.1, 0.1, 0.1, 0.1) # Outer margins.
)
cheddar::PlotWebByLevel(
theGc,
show.level.lines = TRUE,
level = "LongWeightedTrophicLevel"
)
if (!reducedTrophic) {
plot(
theGi,
layout = theLayout,
vertex.size = theVs,
edge.width = 1,
edge.arrow.size = 0.3,
edge.arrow.width = 1,
vertex.color = theColors,
edge.lty = 2,
edge.color = "grey",
edge.arrow.mode = ">",
main = "Consumption"
)
} else {
EVListRed <- EVList
EVListRed$Edges <- EVListRed$Edges %>% dplyr::filter(
effectNormalised >= threshold
)
theGc2 <- toCheddar(EVListRed, name = "Strongest Trophic Levels")
cheddar::PlotWebByLevel(
theGc2,
show.level.lines = TRUE,
level = "LongWeightedTrophicLevel"
)
}
plot(
theGiGain,
layout = theLayout,
vertex.size = theVs,
edge.width = theGain,
edge.arrow.size = 0.3,
edge.arrow.width = 1,
vertex.color = theColors,
edge.lty = 2,
edge.color = "blue",
edge.arrow.mode = ">",
main = "Consumer's Gains"
)
plot(
theGiLoss,
layout = theLayout,
vertex.size = theVs,
edge.width = theLoss,
edge.arrow.size = 0.3,
edge.arrow.width = 2,
vertex.color = theColors,
edge.lty = 3,
edge.color = "darkred",
edge.arrow.mode = "<",
main = "Resource's Losses"
)
par(parold)
EVList$Edges %>% dplyr::ungroup() %>% dplyr::filter(
effectNormalised >= threshold
) %>% dplyr::select(
-effectSign
) %>% dplyr::arrange(
to, -effectNormalised
)
}
thresholdEdges <- 0.3
We use a threshold of 0.3 at first, followed by an adaptive threshold. For the adaptive, we use the smallest largest effect of a given type for a given recipient. To break that down, the largest effect of a given type might be used as a proxy for how specialist a given recipient’s interactions are. The smallest one of these can be thought of as the most generalist species in the graph’s threshold to have at least one edge of both positive and negative type included.
toPostMortem(foodWebs[[1]], nodeSize = "None", threshold = thresholdEdges) -> temp
temp
toPostMortem(foodWebs[[5]], nodeSize = "None", threshold = thresholdEdges) -> temp
temp
toPostMortem(foodWebs[[9]], nodeSize = "None", threshold = thresholdEdges) -> temp
temp
toPostMortem(foodWebs[[1]], nodeSize = "None", threshold = "Adaptive") -> temp
temp
toPostMortem(foodWebs[[5]], nodeSize = "None", threshold = "Adaptive") -> temp
temp
toPostMortem(foodWebs[[9]], nodeSize = "None", threshold = "Adaptive") -> temp
temp